without undue squeezing.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
#ifdef __x86_64__
/* Resume use of ISTs now that the host TR is reinstated. */
- idt_tables[cpu][TRAP_double_fault].a |= 1UL << 32; /* IST1 */
- idt_tables[cpu][TRAP_nmi].a |= 2UL << 32; /* IST2 */
- idt_tables[cpu][TRAP_machine_check].a |= 3UL << 32; /* IST3 */
+ idt_tables[cpu][TRAP_double_fault].a |= IST_DF << 32;
+ idt_tables[cpu][TRAP_nmi].a |= IST_NMI << 32;
+ idt_tables[cpu][TRAP_machine_check].a |= IST_MCE << 32;
#endif
}
* Cannot use ISTs for NMI/#MC/#DF while we are running with the guest TR.
* But this doesn't matter: the IST is only req'd to handle SYSCALL/SYSRET.
*/
- idt_tables[cpu][TRAP_double_fault].a &= ~(3UL << 32);
- idt_tables[cpu][TRAP_nmi].a &= ~(3UL << 32);
- idt_tables[cpu][TRAP_machine_check].a &= ~(3UL << 32);
+ idt_tables[cpu][TRAP_double_fault].a &= ~(7UL << 32);
+ idt_tables[cpu][TRAP_nmi].a &= ~(7UL << 32);
+ idt_tables[cpu][TRAP_machine_check].a &= ~(7UL << 32);
#endif
svm_restore_dr(v);
void memguard_guard_stack(void *p)
{
- BUILD_BUG_ON((DEBUG_STACK_SIZE + PAGE_SIZE) > STACK_SIZE);
- p = (void *)((unsigned long)p + STACK_SIZE - DEBUG_STACK_SIZE - PAGE_SIZE);
+ BUILD_BUG_ON((PRIMARY_STACK_SIZE + PAGE_SIZE) > STACK_SIZE);
+ p = (void *)((unsigned long)p + STACK_SIZE -
+ PRIMARY_STACK_SIZE - PAGE_SIZE);
memguard_guard_range(p, PAGE_SIZE);
}
struct tss_struct init_tss[NR_CPUS];
-char __attribute__ ((__section__(".bss.page_aligned"))) cpu0_stack[STACK_SIZE];
+char __attribute__ ((__section__(".bss.stack_aligned"))) cpu0_stack[STACK_SIZE];
struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
unsigned long *stack, addr;
esp_bottom = (esp | (STACK_SIZE - 1)) + 1;
- esp_top = esp_bottom - DEBUG_STACK_SIZE;
+ esp_top = esp_bottom - PRIMARY_STACK_SIZE;
printk("Valid stack range: %p-%p, sp=%p, tss.esp0=%p\n",
(void *)esp_top, (void *)esp_bottom, (void *)esp,
.data.percpu : { *(.data.percpu) } :text
__per_cpu_data_end = .;
. = __per_cpu_start + (NR_CPUS << PERCPU_SHIFT);
- . = ALIGN(STACK_SIZE);
+ . = ALIGN(PAGE_SIZE);
__per_cpu_end = .;
__bss_start = .; /* BSS */
.bss : {
+ . = ALIGN(STACK_SIZE);
*(.bss.stack_aligned)
+ . = ALIGN(PAGE_SIZE);
*(.bss.page_aligned)
*(.bss)
} :text
if ( cpu == 0 )
{
- /* Specify dedicated interrupt stacks for NMIs and double faults. */
+ /* Specify dedicated interrupt stacks for NMI, #DF, and #MC. */
set_intr_gate(TRAP_double_fault, &double_fault);
- idt_table[TRAP_double_fault].a |= 1UL << 32; /* IST1 */
- idt_table[TRAP_nmi].a |= 2UL << 32; /* IST2 */
- idt_table[TRAP_machine_check].a |= 3UL << 32; /* IST3 */
+ idt_table[TRAP_double_fault].a |= IST_DF << 32;
+ idt_table[TRAP_nmi].a |= IST_NMI << 32;
+ idt_table[TRAP_machine_check].a |= IST_MCE << 32;
/*
* The 32-on-64 hypercall entry vector is only accessible from ring 1.
stack_bottom = (char *)get_stack_bottom();
stack = (char *)((unsigned long)stack_bottom & ~(STACK_SIZE - 1));
- /* Machine Check handler has its own per-CPU 1kB stack. */
- init_tss[cpu].ist[2] = (unsigned long)&stack[1024];
+ /* IST_MAX IST pages + 1 syscall page + 1 guard page + primary stack. */
+ BUILD_BUG_ON((IST_MAX + 2) * PAGE_SIZE + PRIMARY_STACK_SIZE > STACK_SIZE);
- /* Double-fault handler has its own per-CPU 1kB stack. */
- init_tss[cpu].ist[0] = (unsigned long)&stack[2048];
+ /* Machine Check handler has its own per-CPU 4kB stack. */
+ init_tss[cpu].ist[IST_MCE] = (unsigned long)&stack[IST_MCE * PAGE_SIZE];
- /* NMI handler has its own per-CPU 1kB stack. */
- init_tss[cpu].ist[1] = (unsigned long)&stack[3072];
+ /* Double-fault handler has its own per-CPU 4kB stack. */
+ init_tss[cpu].ist[IST_DF] = (unsigned long)&stack[IST_DF * PAGE_SIZE];
+
+ /* NMI handler has its own per-CPU 4kB stack. */
+ init_tss[cpu].ist[IST_NMI] = (unsigned long)&stack[IST_NMI * PAGE_SIZE];
/* Trampoline for SYSCALL entry from long mode. */
- stack = &stack[3072]; /* Skip the NMI and DF stacks. */
+ stack = &stack[IST_MAX * PAGE_SIZE]; /* Skip the IST stacks. */
wrmsr(MSR_LSTAR, (unsigned long)stack, ((unsigned long)stack>>32));
stack += write_stack_trampoline(stack, stack_bottom, FLAT_KERNEL_CS64);
.data.percpu : { *(.data.percpu) } :text
__per_cpu_data_end = .;
. = __per_cpu_start + (NR_CPUS << PERCPU_SHIFT);
- . = ALIGN(STACK_SIZE);
+ . = ALIGN(PAGE_SIZE);
__per_cpu_end = .;
__bss_start = .; /* BSS */
.bss : {
+ . = ALIGN(STACK_SIZE);
*(.bss.stack_aligned)
+ . = ALIGN(PAGE_SIZE);
*(.bss.page_aligned)
*(.bss)
} :text
#define MEMORY_GUARD
#endif
+#ifdef __i386__
#define STACK_ORDER 2
+#else
+#define STACK_ORDER 3
+#endif
#define STACK_SIZE (PAGE_SIZE << STACK_ORDER)
-/* Debug stack is restricted to 8kB by guard pages. */
-#define DEBUG_STACK_SIZE 8192
+/* Primary stack is restricted to 8kB by guard pages. */
+#define PRIMARY_STACK_SIZE 8192
#define CONFIG_DMA_BITSIZE 32
u8 __cacheline_filler[24];
} __cacheline_aligned __attribute__((packed));
+#ifdef __x86_64__
+# define IST_DF 1UL
+# define IST_NMI 2UL
+# define IST_MCE 3UL
+# define IST_MAX 3UL
+#endif
+
#define IDT_ENTRIES 256
extern idt_entry_t idt_table[];
extern idt_entry_t *idt_tables[];